region/lib.rs
1#![deny(
2 clippy::all,
3 clippy::missing_inline_in_public_items,
4 clippy::ptr_as_ptr,
5 clippy::print_stdout,
6 missing_docs,
7 nonstandard_style,
8 unused,
9 warnings
10)]
11// Temporarily allow these until bitflags deps is upgraded to 2.x
12#![allow(clippy::bad_bit_mask)]
13//! Cross-platform virtual memory API.
14//!
15//! This crate provides a cross-platform Rust API for querying and manipulating
16//! virtual memory. It is a thin abstraction, with the underlying interaction
17//! implemented using platform specific APIs (e.g `VirtualQuery`, `VirtualLock`,
18//! `mprotect`, `mlock`). Albeit not all OS specific quirks are abstracted away;
19//! for instance, some OSs enforce memory pages to be readable, whilst other may
20//! prevent pages from becoming executable (i.e DEP).
21//!
22//! This implementation operates with memory pages, which are aligned to the
23//! operating system's page size. On some systems, but not all, the system calls
24//! for these operations require input to be aligned to a page boundary. To
25//! remedy this inconsistency, whenever applicable, input is aligned to its
26//! closest page boundary.
27//!
28//! *Note: a region is a collection of one or more pages laying consecutively in
29//! memory, with the same properties.*
30//!
31//! # Parallelism
32//!
33//! The properties of virtual memory pages can change at any time, unless all
34//! threads that are unaccounted for in a process are stopped. Therefore to
35//! obtain, e.g., a true picture of a process' virtual memory, all other threads
36//! must be halted. Otherwise, a region descriptor only represents a snapshot in
37//! time.
38//!
39//! # Installation
40//!
41//! This crate is [on crates.io](https://crates.io/crates/region) and can be
42//! used by adding `region` to your dependencies in your project's `Cargo.toml`.
43//!
44//! ```toml
45//! [dependencies]
46//! region = "3.0.2"
47//! ```
48//!
49//! # Examples
50//!
51//! - Cross-platform equivalents.
52//!
53//! ```rust
54//! # unsafe fn example() -> region::Result<()> {
55//! # use region::Protection;
56//! let data = [0xDE, 0xAD, 0xBE, 0xEF];
57//!
58//! // Page size
59//! let pz = region::page::size();
60//! let pc = region::page::ceil(data.as_ptr());
61//! let pf = region::page::floor(data.as_ptr());
62//!
63//! // VirtualQuery | '/proc/self/maps'
64//! let q = region::query(data.as_ptr())?;
65//! let qr = region::query_range(data.as_ptr(), data.len())?;
66//!
67//! // VirtualAlloc | mmap
68//! let alloc = region::alloc(100, Protection::READ_WRITE)?;
69//!
70//! // VirtualProtect | mprotect
71//! region::protect(data.as_ptr(), data.len(), Protection::READ_WRITE_EXECUTE)?;
72//!
73//! // ... you can also temporarily change one or more pages' protection
74//! let handle = region::protect_with_handle(data.as_ptr(), data.len(), Protection::READ_WRITE_EXECUTE)?;
75//!
76//! // VirtualLock | mlock
77//! let guard = region::lock(data.as_ptr(), data.len())?;
78//! # Ok(())
79//! # }
80//! ```
81
82#[macro_use]
83extern crate bitflags;
84
85pub use alloc::{alloc, alloc_at, Allocation};
86pub use error::{Error, Result};
87pub use lock::{lock, unlock, LockGuard};
88pub use protect::{protect, protect_with_handle, ProtectGuard};
89pub use query::{query, query_range, QueryIter};
90
91mod alloc;
92mod error;
93mod lock;
94mod os;
95pub mod page;
96mod protect;
97mod query;
98mod util;
99
100/// A descriptor for a mapped memory region.
101///
102/// The region encompasses zero or more pages (e.g. OpenBSD can have null-sized
103/// virtual pages).
104#[derive(Debug, Clone, Copy, PartialEq, Eq)]
105pub struct Region {
106 /// Base address of the region
107 base: *const (),
108 /// Whether the region is reserved or not
109 reserved: bool,
110 /// Whether the region is guarded or not
111 guarded: bool,
112 /// Protection of the region
113 protection: Protection,
114 /// Maximum protection of the region
115 max_protection: Protection,
116 /// Whether the region is shared or not
117 shared: bool,
118 /// Size of the region (multiple of page size)
119 size: usize,
120}
121
122impl Region {
123 /// Returns a pointer to the region's base address.
124 ///
125 /// The address is always aligned to the operating system's page size.
126 #[inline(always)]
127 pub fn as_ptr<T>(&self) -> *const T {
128 self.base.cast()
129 }
130
131 /// Returns a mutable pointer to the region's base address.
132 #[inline(always)]
133 pub fn as_mut_ptr<T>(&mut self) -> *mut T {
134 self.base as *mut T
135 }
136
137 /// Returns two raw pointers spanning the region's address space.
138 ///
139 /// The returned range is half-open, which means that the end pointer points
140 /// one past the last element of the region. This way, an empty region is
141 /// represented by two equal pointers, and the difference between the two
142 /// pointers represents the size of the region.
143 #[inline(always)]
144 pub fn as_ptr_range<T>(&self) -> std::ops::Range<*const T> {
145 let range = self.as_range();
146 (range.start as *const T)..(range.end as *const T)
147 }
148
149 /// Returns two mutable raw pointers spanning the region's address space.
150 #[inline(always)]
151 pub fn as_mut_ptr_range<T>(&mut self) -> std::ops::Range<*mut T> {
152 let range = self.as_range();
153 (range.start as *mut T)..(range.end as *mut T)
154 }
155
156 /// Returns a range spanning the region's address space.
157 #[inline(always)]
158 pub fn as_range(&self) -> std::ops::Range<usize> {
159 (self.base as usize)..(self.base as usize).saturating_add(self.size)
160 }
161
162 /// Returns whether the region is committed or not.
163 ///
164 /// This is always true for all operating system's, the exception being
165 /// `MEM_RESERVE` pages on Windows.
166 #[inline(always)]
167 pub fn is_committed(&self) -> bool {
168 !self.reserved
169 }
170
171 /// Returns whether the region is readable or not.
172 #[inline(always)]
173 pub fn is_readable(&self) -> bool {
174 self.protection & Protection::READ == Protection::READ
175 }
176
177 /// Returns whether the region is writable or not.
178 #[inline(always)]
179 pub fn is_writable(&self) -> bool {
180 self.protection & Protection::WRITE == Protection::WRITE
181 }
182
183 /// Returns whether the region is executable or not.
184 #[inline(always)]
185 pub fn is_executable(&self) -> bool {
186 self.protection & Protection::EXECUTE == Protection::EXECUTE
187 }
188
189 /// Returns whether the region is guarded or not.
190 #[inline(always)]
191 pub fn is_guarded(&self) -> bool {
192 self.guarded
193 }
194
195 /// Returns whether the region is shared between processes or not.
196 #[inline(always)]
197 pub fn is_shared(&self) -> bool {
198 self.shared
199 }
200
201 /// Returns the size of the region in bytes.
202 ///
203 /// The size is always aligned to a multiple of the operating system's page
204 /// size.
205 #[inline(always)]
206 pub fn len(&self) -> usize {
207 self.size
208 }
209
210 /// Returns whether region is empty or not.
211 #[inline(always)]
212 pub fn is_empty(&self) -> bool {
213 self.size == 0
214 }
215
216 /// Returns the protection attributes of the region.
217 #[inline(always)]
218 pub fn protection(&self) -> Protection {
219 self.protection
220 }
221}
222
223impl Default for Region {
224 #[inline]
225 fn default() -> Self {
226 Self {
227 base: std::ptr::null(),
228 reserved: false,
229 guarded: false,
230 protection: Protection::NONE,
231 max_protection: Protection::NONE,
232 shared: false,
233 size: 0,
234 }
235 }
236}
237
238unsafe impl Send for Region {}
239unsafe impl Sync for Region {}
240
241bitflags! {
242 /// A bitflag of zero or more protection attributes.
243 ///
244 /// Determines the access rights for a specific page and/or region. Some
245 /// combination of flags may not be applicable, depending on the OS (e.g macOS
246 /// enforces executable pages to be readable, OpenBSD requires W^X).
247 ///
248 /// # OS-Specific Behavior
249 ///
250 /// On Unix `Protection::from_bits_unchecked` can be used to apply
251 /// non-standard flags (e.g. `PROT_BTI`).
252 ///
253 /// # Examples
254 ///
255 /// ```
256 /// use region::Protection;
257 ///
258 /// let combine = Protection::READ | Protection::WRITE;
259 /// let shorthand = Protection::READ_WRITE;
260 /// ```
261 #[derive(Default)]
262 pub struct Protection: usize {
263 /// No access allowed at all.
264 const NONE = 0;
265 /// Read access; writing and/or executing data will panic.
266 const READ = (1 << 0);
267 /// Write access; this flag alone may not be supported on all OSs.
268 const WRITE = (1 << 1);
269 /// Execute access; this may not be allowed depending on DEP.
270 const EXECUTE = (1 << 2);
271 /// Read and execute shorthand.
272 const READ_EXECUTE = (Self::READ.bits | Self::EXECUTE.bits);
273 /// Read and write shorthand.
274 const READ_WRITE = (Self::READ.bits | Self::WRITE.bits);
275 /// Read, write and execute shorthand.
276 const READ_WRITE_EXECUTE = (Self::READ.bits | Self::WRITE.bits | Self::EXECUTE.bits);
277 /// Write and execute shorthand.
278 const WRITE_EXECUTE = (Self::WRITE.bits | Self::EXECUTE.bits);
279 }
280}
281
282impl std::fmt::Display for Protection {
283 #[inline]
284 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
285 const MAPPINGS: &[(Protection, char)] = &[
286 (Protection::READ, 'r'),
287 (Protection::WRITE, 'w'),
288 (Protection::EXECUTE, 'x'),
289 ];
290
291 for (flag, symbol) in MAPPINGS {
292 if self.contains(*flag) {
293 write!(f, "{}", symbol)?;
294 } else {
295 write!(f, "-")?;
296 }
297 }
298
299 Ok(())
300 }
301}
302
303#[cfg(test)]
304mod tests {
305 use super::*;
306
307 #[test]
308 fn protection_implements_display() {
309 assert_eq!(Protection::READ.to_string(), "r--");
310 assert_eq!(Protection::READ_WRITE.to_string(), "rw-");
311 assert_eq!(Protection::READ_WRITE_EXECUTE.to_string(), "rwx");
312 assert_eq!(Protection::WRITE.to_string(), "-w-");
313 }
314
315 #[cfg(unix)]
316 pub mod util {
317 use crate::{page, Protection};
318 use mmap::{MapOption, MemoryMap};
319 use std::ops::Deref;
320
321 struct AllocatedPages(Vec<MemoryMap>);
322
323 impl Deref for AllocatedPages {
324 type Target = [u8];
325
326 fn deref(&self) -> &Self::Target {
327 unsafe { std::slice::from_raw_parts(self.0[0].data().cast(), self.0.len() * page::size()) }
328 }
329 }
330
331 #[allow(clippy::fallible_impl_from)]
332 impl From<Protection> for &'static [MapOption] {
333 fn from(protection: Protection) -> Self {
334 match protection {
335 Protection::NONE => &[],
336 Protection::READ => &[MapOption::MapReadable],
337 Protection::READ_WRITE => &[MapOption::MapReadable, MapOption::MapWritable],
338 Protection::READ_EXECUTE => &[MapOption::MapReadable, MapOption::MapExecutable],
339 _ => panic!("Unsupported protection {:?}", protection),
340 }
341 }
342 }
343
344 /// Allocates one or more sequential pages for each protection flag.
345 pub fn alloc_pages(pages: &[Protection]) -> impl Deref<Target = [u8]> {
346 // Find a region that fits all pages
347 let region = MemoryMap::new(page::size() * pages.len(), &[]).expect("allocating pages");
348 let mut page_address = region.data();
349
350 // Drop the region to ensure it's free
351 std::mem::forget(region);
352
353 // Allocate one page at a time, with explicit page permissions. This would
354 // normally introduce a race condition, but since only one thread is used
355 // during testing, it ensures each page remains available (in general,
356 // only one thread should ever be active when querying and/or manipulating
357 // memory regions).
358 let allocated_pages = pages
359 .iter()
360 .map(|protection| {
361 let mut options = vec![MapOption::MapAddr(page_address)];
362 options.extend_from_slice(Into::into(*protection));
363
364 let map = MemoryMap::new(page::size(), &options).expect("allocating page");
365 assert_eq!(map.data(), page_address);
366 assert_eq!(map.len(), page::size());
367
368 page_address = (page_address as usize + page::size()) as *mut _;
369 map
370 })
371 .collect::<Vec<_>>();
372
373 AllocatedPages(allocated_pages)
374 }
375 }
376
377 #[cfg(windows)]
378 pub mod util {
379 use crate::{page, Protection};
380 use std::ops::Deref;
381 use windows_sys::Win32::System::Memory::{
382 VirtualAlloc, VirtualFree, MEM_COMMIT, MEM_RELEASE, MEM_RESERVE, PAGE_NOACCESS,
383 };
384
385 struct AllocatedPages(*const (), usize);
386
387 impl Deref for AllocatedPages {
388 type Target = [u8];
389
390 fn deref(&self) -> &Self::Target {
391 unsafe { std::slice::from_raw_parts(self.0 as *const _, self.1) }
392 }
393 }
394
395 impl Drop for AllocatedPages {
396 fn drop(&mut self) {
397 unsafe {
398 assert_ne!(VirtualFree(self.0 as *mut _, 0, MEM_RELEASE), 0);
399 }
400 }
401 }
402
403 /// Allocates one or more sequential pages for each protection flag.
404 pub fn alloc_pages(pages: &[Protection]) -> impl Deref<Target = [u8]> {
405 // Reserve enough memory to fit each page
406 let total_size = page::size() * pages.len();
407 let allocation_base =
408 unsafe { VirtualAlloc(std::ptr::null_mut(), total_size, MEM_RESERVE, PAGE_NOACCESS) };
409 assert_ne!(allocation_base, std::ptr::null_mut());
410
411 let mut page_address = allocation_base;
412
413 // Commit one page at a time with the expected permissions
414 for protection in pages {
415 let address = unsafe {
416 VirtualAlloc(
417 page_address,
418 page::size(),
419 MEM_COMMIT,
420 protection.to_native(),
421 )
422 };
423 assert_eq!(address, page_address);
424 page_address = (address as usize + page::size()) as *mut _;
425 }
426
427 AllocatedPages(allocation_base as *const _, total_size)
428 }
429 }
430}